40f5623aGPlsm0u1LTO-NVZ6AGzNRQ linux-2.6.11-xen-sparse/include/asm-xen/hypervisor.h
3f108af1ylCIm82H052FVTfXACBHrw linux-2.6.11-xen-sparse/include/asm-xen/linux-public/privcmd.h
3fa8e3f0kBLeE4To2vpdi3cpJbIkbQ linux-2.6.11-xen-sparse/include/asm-xen/linux-public/suspend.h
-40f5623cndVUFlkxpf7Lfx7xu8madQ linux-2.6.11-xen-sparse/include/asm-xen/multicall.h
4122466356eIBnC9ot44WSVVIFyhQA linux-2.6.11-xen-sparse/include/asm-xen/queues.h
3f689063BoW-HWV3auUJ-OqXfcGArw linux-2.6.11-xen-sparse/include/asm-xen/xen_proc.h
419b4e93z2S0gR17XTy8wg09JEwAhg linux-2.6.11-xen-sparse/include/linux/gfp.h
#include <linux/vmalloc.h>
#include <linux/slab.h>
+#include <asm/mmu_context.h>
#include <asm/uaccess.h>
#include <asm/system.h>
#include <asm/ldt.h>
pc->ldt,
(pc->size*LDT_ENTRY_SIZE)/PAGE_SIZE);
load_LDT(pc);
- flush_page_update_queue();
#ifdef CONFIG_SMP
if (current->mm->cpu_vm_mask != (1<<smp_processor_id()))
smp_call_function(flush_ldt, 0, 1, 1);
}
wmb();
if (oldsize) {
+ make_pages_writable(
+ oldldt, (oldsize*LDT_ENTRY_SIZE)/PAGE_SIZE);
if (oldsize*LDT_ENTRY_SIZE > PAGE_SIZE)
vfree(oldldt);
else
}
memcpy(new->ldt, old->ldt, old->size*LDT_ENTRY_SIZE);
make_pages_readonly(new->ldt, (new->size*LDT_ENTRY_SIZE)/PAGE_SIZE);
- flush_page_update_queue();
return 0;
}
void destroy_context(struct mm_struct *mm)
{
if (mm->context.size) {
+ if (mm_state_sync & STATE_SYNC_LDT)
+ clear_LDT();
make_pages_writable(
mm->context.ldt,
(mm->context.size*LDT_ENTRY_SIZE)/PAGE_SIZE);
- flush_page_update_queue();
if (mm->context.size*LDT_ENTRY_SIZE > PAGE_SIZE)
vfree(mm->context.ldt);
else
#include <asm/i387.h>
#include <asm/desc.h>
#include <asm/mmu_context.h>
-#include <asm/multicall.h>
#include <asm-xen/xen-public/physdev.h>
#include <linux/irq.h>
{
struct thread_struct *next = &next_p->thread;
physdev_op_t op;
+ multicall_entry_t _mcl[8], *mcl = _mcl;
+ mmu_update_t _mmu[2], *mmu = _mmu;
- __cli();
+ if ( mm_state_sync & STATE_SYNC_PT )
+ {
+ mmu->ptr = virt_to_machine(cur_pgd) | MMU_EXTENDED_COMMAND;
+ mmu->val = MMUEXT_NEW_BASEPTR;
+ mmu++;
+ }
- /*
- * We clobber FS and GS here so that we avoid a GPF when restoring previous
- * task's FS/GS values in Xen when the LDT is switched. If we don't do this
- * then we can end up erroneously re-flushing the page-update queue when
- * we 'execute_multicall_list'.
- */
- __asm__ __volatile__ (
- "xorl %%eax,%%eax; movl %%eax,%%fs; movl %%eax,%%gs" : : : "eax" );
+ if ( mm_state_sync & STATE_SYNC_LDT )
+ {
+ __asm__ __volatile__ (
+ "xorl %%eax,%%eax; movl %%eax,%%fs; movl %%eax,%%gs" : : : "eax" );
+ mmu->ptr = (unsigned long)next_p->mm->context.ldt |
+ MMU_EXTENDED_COMMAND;
+ mmu->val = (next_p->mm->context.size << MMUEXT_CMD_SHIFT) |
+ MMUEXT_SET_LDT;
+ mmu++;
+ }
- MULTICALL_flush_page_update_queue();
+ if ( mm_state_sync != 0 )
+ {
+ mcl->op = __HYPERVISOR_mmu_update;
+ mcl->args[0] = (unsigned long)_mmu;
+ mcl->args[1] = mmu - _mmu;
+ mcl->args[2] = 0;
+ mcl++;
+ mm_state_sync = 0;
+ }
/*
* This is basically 'unlazy_fpu', except that we queue a multicall to
asm volatile( "fnsave %0 ; fwait"
: "=m" (prev_p->thread.i387.fsave) );
prev_p->flags &= ~PF_USEDFPU;
- queue_multicall1(__HYPERVISOR_fpu_taskswitch, 1);
+ mcl->op = __HYPERVISOR_fpu_taskswitch;
+ mcl->args[0] = 1;
+ mcl++;
}
- queue_multicall2(__HYPERVISOR_stack_switch, __KERNEL_DS, next->esp0);
+ mcl->op = __HYPERVISOR_stack_switch;
+ mcl->args[0] = __KERNEL_DS;
+ mcl->args[1] = next->esp0;
+ mcl++;
if ( prev_p->thread.io_pl != next->io_pl )
{
op.cmd = PHYSDEVOP_SET_IOPL;
op.u.set_iopl.iopl = next->io_pl;
- queue_multicall1(__HYPERVISOR_physdev_op, (unsigned long)&op);
+ mcl->op = __HYPERVISOR_physdev_op;
+ mcl->args[0] = (unsigned long)&op;
+ mcl++;
}
- /* EXECUTE ALL TASK SWITCH XEN SYSCALLS AT THIS POINT. */
- execute_multicall_list();
- __sti();
+ (void)HYPERVISOR_multicall(_mcl, mcl - _mcl);
/*
* Restore %fs and %gs.
unsigned int *phys_to_machine_mapping, *pfn_to_mfn_frame_list;
-DEFINE_PER_CPU(multicall_entry_t, multicall_list[8]);
-DEFINE_PER_CPU(int, nr_multicall_ents);
-
/*
* Machine setup..
*/
HYPERVISOR_stack_switch(__KERNEL_DS, current->thread.esp0);
load_LDT(&init_mm.context);
- flush_page_update_queue();
/* Force FPU initialization. */
current->flags &= ~PF_USEDFPU;
extern void die(const char *,struct pt_regs *,long);
pgd_t *cur_pgd;
+int mm_state_sync;
extern spinlock_t timerlist_lock;
extern struct desc_struct default_ldt[];
+static inline void clear_LDT(void)
+{
+ xen_set_ldt(0, 0);
+}
+
static inline void load_LDT(mm_context_t *pc)
{
void *segments = pc->ldt;
#endif
extern pgd_t *cur_pgd;
+extern int mm_state_sync;
+#define STATE_SYNC_PT 1
+#define STATE_SYNC_LDT 2
static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, struct task_struct *tsk, unsigned cpu)
{
if (prev != next) {
/* stop flush ipis for the previous mm */
clear_bit(cpu, &prev->cpu_vm_mask);
-#ifdef CONFIG_SMP
- cpu_tlbstate[cpu].state = TLBSTATE_OK;
- cpu_tlbstate[cpu].active_mm = next;
-#endif
-
/* Re-load page tables */
cur_pgd = next->pgd;
- queue_pt_switch(__pa(cur_pgd));
- /* load_LDT, if either the previous or next thread
- * has a non-default LDT.
- */
- if (next->context.size+prev->context.size)
- load_LDT(&next->context);
+ mm_state_sync |= STATE_SYNC_PT;
+ /* load_LDT, if either the previous or next thread
+ * has a non-default LDT.
+ */
+ if (next->context.size+prev->context.size)
+ mm_state_sync |= STATE_SYNC_LDT;
}
-#ifdef CONFIG_SMP
- else {
- cpu_tlbstate[cpu].state = TLBSTATE_OK;
- if(cpu_tlbstate[cpu].active_mm != next)
- out_of_line_bug();
- if(!test_and_set_bit(cpu, &next->cpu_vm_mask)) {
- /* We were in lazy tlb mode and leave_mm disabled
- * tlb flush IPI delivery. We must reload %cr3.
- */
- cur_pgd = next->pgd;
- queue_pt_switch(__pa(cur_pgd));
- load_LDT(next);
- }
- }
-#endif
}
-#define activate_mm(prev, next) \
-do { \
- switch_mm((prev),(next),NULL,smp_processor_id()); \
- flush_page_update_queue(); \
+#define activate_mm(prev, next) \
+do { \
+ switch_mm((prev),(next),NULL,smp_processor_id()); \
+ if (mm_state_sync & STATE_SYNC_PT) \
+ xen_pt_switch(__pa(cur_pgd)); \
+ if (mm_state_sync & STATE_SYNC_LDT) \
+ load_LDT(&(next)->context); \
+ mm_state_sync = 0; \
} while ( 0 )
#endif
ln -sf ../../${LINUX_26}/include/asm-xen/evtchn.h
ln -sf ../../${LINUX_26}/include/asm-xen/gnttab.h
ln -sf ../../${LINUX_26}/include/asm-xen/hypervisor.h
-ln -sf ../../${LINUX_26}/include/asm-xen/multicall.h
ln -sf ../../${LINUX_26}/include/asm-xen/xen_proc.h
ln -sf ../../${LINUX_26}/include/asm-xen/asm-i386/synch_bitops.h
frames[f] = virt_to_machine(va) >> PAGE_SHIFT;
make_page_readonly((void *)va);
}
- flush_page_update_queue();
if (HYPERVISOR_set_gdt(frames, gdt_descr->size / 8))
BUG();
lgdt_finish();
load_esp0(t, thread);
load_LDT(&init_mm.context);
- flush_page_update_queue();
/* Clear %fs and %gs. */
asm volatile ("xorl %eax, %eax; movl %eax, %fs; movl %eax, %gs");
#ifdef CONFIG_SMP /* avoids "defined but not used" warnig */
static void flush_ldt(void *null)
{
- if (current->active_mm) {
+ if (current->active_mm)
load_LDT(¤t->active_mm->context);
- flush_page_update_queue();
- }
}
#endif
make_pages_readonly(pc->ldt, (pc->size * LDT_ENTRY_SIZE) /
PAGE_SIZE);
load_LDT(pc);
- flush_page_update_queue();
#ifdef CONFIG_SMP
mask = cpumask_of_cpu(smp_processor_id());
if (!cpus_equal(current->mm->cpu_vm_mask, mask))
if (oldsize) {
make_pages_writable(oldldt, (oldsize * LDT_ENTRY_SIZE) /
PAGE_SIZE);
- flush_page_update_queue();
if (oldsize*LDT_ENTRY_SIZE > PAGE_SIZE)
vfree(oldldt);
else
memcpy(new->ldt, old->ldt, old->size*LDT_ENTRY_SIZE);
make_pages_readonly(new->ldt, (new->size * LDT_ENTRY_SIZE) /
PAGE_SIZE);
- flush_page_update_queue();
return 0;
}
make_pages_writable(mm->context.ldt,
(mm->context.size * LDT_ENTRY_SIZE) /
PAGE_SIZE);
- flush_page_update_queue();
if (mm->context.size*LDT_ENTRY_SIZE > PAGE_SIZE)
vfree(mm->context.ldt);
else
pmd = pmd_offset(pud, (vstart + (i*PAGE_SIZE)));
pte = pte_offset_kernel(pmd, (vstart + (i*PAGE_SIZE)));
pfn = pte->pte_low >> PAGE_SHIFT;
- queue_l1_entry_update(pte, 0);
+ HYPERVISOR_update_va_mapping(
+ vstart + (i*PAGE_SIZE), __pte_ma(0), 0);
phys_to_machine_mapping[(__pa(vstart)>>PAGE_SHIFT)+i] =
INVALID_P2M_ENTRY;
- flush_page_update_queue();
if (HYPERVISOR_dom_mem_op(MEMOP_decrease_reservation,
&pfn, 1, 0) != 1) BUG();
}
pud = pud_offset(pgd, (vstart + (i*PAGE_SIZE)));
pmd = pmd_offset(pud, (vstart + (i*PAGE_SIZE)));
pte = pte_offset_kernel(pmd, (vstart + (i*PAGE_SIZE)));
- queue_l1_entry_update(
- pte, ((pfn+i)<<PAGE_SHIFT)|__PAGE_KERNEL);
- queue_machphys_update(
+ HYPERVISOR_update_va_mapping(
+ vstart + (i*PAGE_SIZE),
+ __pte_ma(((pfn+i)<<PAGE_SHIFT)|__PAGE_KERNEL), 0);
+ xen_machphys_update(
pfn+i, (__pa(vstart)>>PAGE_SHIFT)+i);
phys_to_machine_mapping[(__pa(vstart)>>PAGE_SHIFT)+i] =
pfn+i;
}
- /* Flush updates through and flush the TLB. */
xen_tlb_flush();
balloon_unlock(flags);
#include <asm/i387.h>
#include <asm/irq.h>
#include <asm/desc.h>
-#include <asm-xen/multicall.h>
#include <asm-xen/xen-public/physdev.h>
#ifdef CONFIG_MATH_EMULATION
#include <asm/math_emu.h>
int cpu = smp_processor_id();
struct tss_struct *tss = &per_cpu(init_tss, cpu);
physdev_op_t iopl_op, iobmp_op;
-
- /* NB. No need to disable interrupts as already done in sched.c */
- /* __cli(); */
+ multicall_entry_t _mcl[8], *mcl = _mcl;
/*
* Save away %fs and %gs. No need to save %es and %ds, as
"xorl %%eax,%%eax; movl %%eax,%%fs; movl %%eax,%%gs" : : :
"eax" );
- MULTICALL_flush_page_update_queue();
-
/* never put a printk in __switch_to... printk() calls wake_up*() indirectly */
/*
*/
if (prev_p->thread_info->status & TS_USEDFPU) {
__save_init_fpu(prev_p); /* _not_ save_init_fpu() */
- queue_multicall1(__HYPERVISOR_fpu_taskswitch, 1);
+ mcl->op = __HYPERVISOR_fpu_taskswitch;
+ mcl->args[0] = 1;
+ mcl++;
}
/*
* This is load_esp0(tss, next) with a multicall.
*/
tss->esp0 = next->esp0;
- queue_multicall2(__HYPERVISOR_stack_switch, tss->ss0, tss->esp0);
+ mcl->op = __HYPERVISOR_stack_switch;
+ mcl->args[0] = tss->ss0;
+ mcl->args[1] = tss->esp0;
+ mcl++;
/*
* Load the per-thread Thread-Local Storage descriptor.
* This is load_TLS(next, cpu) with multicalls.
*/
-#define C(i) do { \
- if (unlikely(next->tls_array[i].a != prev->tls_array[i].a || \
- next->tls_array[i].b != prev->tls_array[i].b)) \
- queue_multicall3(__HYPERVISOR_update_descriptor, \
- virt_to_machine(&get_cpu_gdt_table(cpu) \
- [GDT_ENTRY_TLS_MIN + i]), \
- ((u32 *)&next->tls_array[i])[0], \
- ((u32 *)&next->tls_array[i])[1]); \
+#define C(i) do { \
+ if (unlikely(next->tls_array[i].a != prev->tls_array[i].a || \
+ next->tls_array[i].b != prev->tls_array[i].b)) { \
+ mcl->op = __HYPERVISOR_update_descriptor; \
+ mcl->args[0] = virt_to_machine(&get_cpu_gdt_table(cpu) \
+ [GDT_ENTRY_TLS_MIN + i]); \
+ mcl->args[1] = ((u32 *)&next->tls_array[i])[0]; \
+ mcl->args[2] = ((u32 *)&next->tls_array[i])[1]; \
+ mcl++; \
+ } \
} while (0)
C(0); C(1); C(2);
#undef C
if (unlikely(prev->io_pl != next->io_pl)) {
iopl_op.cmd = PHYSDEVOP_SET_IOPL;
iopl_op.u.set_iopl.iopl = next->io_pl;
- queue_multicall1(__HYPERVISOR_physdev_op,
- (unsigned long)&iopl_op);
+ mcl->op = __HYPERVISOR_physdev_op;
+ mcl->args[0] = (unsigned long)&iopl_op;
+ mcl++;
}
if (unlikely(prev->io_bitmap_ptr || next->io_bitmap_ptr)) {
(unsigned long)next->io_bitmap_ptr;
iobmp_op.u.set_iobitmap.nr_ports =
next->io_bitmap_ptr ? IO_BITMAP_BITS : 0;
- queue_multicall1(__HYPERVISOR_physdev_op,
- (unsigned long)&iobmp_op);
+ mcl->op = __HYPERVISOR_physdev_op;
+ mcl->args[0] = (unsigned long)&iobmp_op;
+ mcl++;
}
- /* EXECUTE ALL TASK SWITCH XEN SYSCALLS AT THIS POINT. */
- execute_multicall_list();
- /* __sti(); */
+ (void)HYPERVISOR_multicall(_mcl, mcl - _mcl);
/*
* Restore %fs and %gs if needed.
unsigned int *phys_to_machine_mapping, *pfn_to_mfn_frame_list;
EXPORT_SYMBOL(phys_to_machine_mapping);
-DEFINE_PER_CPU(multicall_entry_t, multicall_list[8]);
-DEFINE_PER_CPU(int, nr_multicall_ents);
-
/* Raw start-of-day parameters from the hypervisor. */
union xen_start_info_union xen_start_info_union;
make_page_readonly((void *)va);
}
ctxt.gdt_ents = cpu_gdt_descr[cpu].size / 8;
- flush_page_update_queue();
}
/* Ring 1 stack is the initial stack. */
* and a callgate to lcall27 for Solaris/x86 binaries
*/
make_lowmem_page_readonly(&default_ldt[0]);
- flush_page_update_queue();
/*
* Should be a barrier for any external CPU state.
#include <asm/page.h>
#include <asm/pgtable.h>
#include <asm-xen/hypervisor.h>
-#include <asm-xen/multicall.h>
#include <asm-xen/balloon.h>
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
#include <linux/percpu.h>
#endif
-/*
- * This suffices to protect us if we ever move to SMP domains.
- * Further, it protects us against interrupts. At the very least, this is
- * required for the network driver which flushes the update queue before
- * pushing new receive buffers.
- */
-static spinlock_t update_lock = SPIN_LOCK_UNLOCKED;
-
-#define QUEUE_SIZE 128
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
#define pte_offset_kernel pte_offset
#define pud_t pgd_t
#define pmd_val_ma(v) (v).pud.pgd.pgd;
#endif
-DEFINE_PER_CPU(mmu_update_t, update_queue[QUEUE_SIZE]);
-DEFINE_PER_CPU(unsigned int, mmu_update_queue_idx);
-
-/*
- * MULTICALL_flush_page_update_queue:
- * This is a version of the flush which queues as part of a multicall.
- */
-void MULTICALL_flush_page_update_queue(void)
-{
- int cpu = smp_processor_id();
- int idx;
- unsigned long flags;
- unsigned int _idx;
- spin_lock_irqsave(&update_lock, flags);
- idx = per_cpu(mmu_update_queue_idx, cpu);
- if ( (_idx = idx) != 0 )
- {
- per_cpu(mmu_update_queue_idx, cpu) = 0;
- wmb(); /* Make sure index is cleared first to avoid double updates. */
- queue_multicall3(__HYPERVISOR_mmu_update,
- (unsigned long)&per_cpu(update_queue[0], cpu),
- (unsigned long)_idx,
- (unsigned long)NULL);
- }
- spin_unlock_irqrestore(&update_lock, flags);
-}
-
-static inline void __flush_page_update_queue(void)
-{
- int cpu = smp_processor_id();
- unsigned int _idx = per_cpu(mmu_update_queue_idx, cpu);
- per_cpu(mmu_update_queue_idx, cpu) = 0;
- wmb(); /* Make sure index is cleared first to avoid double updates. */
- if ( unlikely(HYPERVISOR_mmu_update(&per_cpu(update_queue[0], cpu), _idx, NULL) < 0) )
- {
- printk(KERN_ALERT "Failed to execute MMU updates.\n");
- BUG();
- }
-}
-
-void _flush_page_update_queue(void)
-{
- int cpu = smp_processor_id();
- unsigned long flags;
- spin_lock_irqsave(&update_lock, flags);
- if ( per_cpu(mmu_update_queue_idx, cpu) != 0 ) __flush_page_update_queue();
- spin_unlock_irqrestore(&update_lock, flags);
-}
-
-static inline void increment_index(void)
-{
- int cpu = smp_processor_id();
- per_cpu(mmu_update_queue_idx, cpu)++;
- if ( unlikely(per_cpu(mmu_update_queue_idx, cpu) == QUEUE_SIZE) ) __flush_page_update_queue();
-}
-
-static inline void increment_index_and_flush(void)
-{
- int cpu = smp_processor_id();
- per_cpu(mmu_update_queue_idx, cpu)++;
- __flush_page_update_queue();
-}
-
-void queue_l1_entry_update(pte_t *ptr, unsigned long val)
-{
- int cpu = smp_processor_id();
- int idx;
- unsigned long flags;
- spin_lock_irqsave(&update_lock, flags);
- idx = per_cpu(mmu_update_queue_idx, cpu);
- per_cpu(update_queue[idx], cpu).ptr = virt_to_machine(ptr);
- per_cpu(update_queue[idx], cpu).val = val;
- increment_index();
- spin_unlock_irqrestore(&update_lock, flags);
-}
-
-void queue_l2_entry_update(pmd_t *ptr, pmd_t val)
-{
- int cpu = smp_processor_id();
- int idx;
- unsigned long flags;
- spin_lock_irqsave(&update_lock, flags);
- idx = per_cpu(mmu_update_queue_idx, cpu);
- per_cpu(update_queue[idx], cpu).ptr = virt_to_machine(ptr);
- per_cpu(update_queue[idx], cpu).val = pmd_val_ma(val);
- increment_index();
- spin_unlock_irqrestore(&update_lock, flags);
-}
-
-void queue_pt_switch(unsigned long ptr)
-{
- int cpu = smp_processor_id();
- int idx;
- unsigned long flags;
- spin_lock_irqsave(&update_lock, flags);
- idx = per_cpu(mmu_update_queue_idx, cpu);
- per_cpu(update_queue[idx], cpu).ptr = phys_to_machine(ptr);
- per_cpu(update_queue[idx], cpu).ptr |= MMU_EXTENDED_COMMAND;
- per_cpu(update_queue[idx], cpu).val = MMUEXT_NEW_BASEPTR;
- increment_index();
- spin_unlock_irqrestore(&update_lock, flags);
-}
-
-void queue_tlb_flush(void)
-{
- int cpu = smp_processor_id();
- int idx;
- unsigned long flags;
- spin_lock_irqsave(&update_lock, flags);
- idx = per_cpu(mmu_update_queue_idx, cpu);
- per_cpu(update_queue[idx], cpu).ptr = MMU_EXTENDED_COMMAND;
- per_cpu(update_queue[idx], cpu).val = MMUEXT_TLB_FLUSH;
- increment_index();
- spin_unlock_irqrestore(&update_lock, flags);
-}
-
-void queue_invlpg(unsigned long ptr)
-{
- int cpu = smp_processor_id();
- int idx;
- unsigned long flags;
- spin_lock_irqsave(&update_lock, flags);
- idx = per_cpu(mmu_update_queue_idx, cpu);
- per_cpu(update_queue[idx], cpu).ptr = MMU_EXTENDED_COMMAND;
- per_cpu(update_queue[idx], cpu).ptr |= ptr & PAGE_MASK;
- per_cpu(update_queue[idx], cpu).val = MMUEXT_INVLPG;
- increment_index();
- spin_unlock_irqrestore(&update_lock, flags);
-}
-
-void queue_pgd_pin(unsigned long ptr)
-{
- int cpu = smp_processor_id();
- int idx;
- unsigned long flags;
- spin_lock_irqsave(&update_lock, flags);
- idx = per_cpu(mmu_update_queue_idx, cpu);
- per_cpu(update_queue[idx], cpu).ptr = phys_to_machine(ptr);
- per_cpu(update_queue[idx], cpu).ptr |= MMU_EXTENDED_COMMAND;
- per_cpu(update_queue[idx], cpu).val = MMUEXT_PIN_L2_TABLE;
- increment_index();
- spin_unlock_irqrestore(&update_lock, flags);
-}
-
-void queue_pgd_unpin(unsigned long ptr)
-{
- int cpu = smp_processor_id();
- int idx;
- unsigned long flags;
- spin_lock_irqsave(&update_lock, flags);
- idx = per_cpu(mmu_update_queue_idx, cpu);
- per_cpu(update_queue[idx], cpu).ptr = phys_to_machine(ptr);
- per_cpu(update_queue[idx], cpu).ptr |= MMU_EXTENDED_COMMAND;
- per_cpu(update_queue[idx], cpu).val = MMUEXT_UNPIN_TABLE;
- increment_index();
- spin_unlock_irqrestore(&update_lock, flags);
-}
-
-void queue_pte_pin(unsigned long ptr)
-{
- int cpu = smp_processor_id();
- int idx;
- unsigned long flags;
- spin_lock_irqsave(&update_lock, flags);
- idx = per_cpu(mmu_update_queue_idx, cpu);
- per_cpu(update_queue[idx], cpu).ptr = phys_to_machine(ptr);
- per_cpu(update_queue[idx], cpu).ptr |= MMU_EXTENDED_COMMAND;
- per_cpu(update_queue[idx], cpu).val = MMUEXT_PIN_L1_TABLE;
- increment_index();
- spin_unlock_irqrestore(&update_lock, flags);
-}
-
-void queue_pte_unpin(unsigned long ptr)
-{
- int cpu = smp_processor_id();
- int idx;
- unsigned long flags;
- spin_lock_irqsave(&update_lock, flags);
- idx = per_cpu(mmu_update_queue_idx, cpu);
- per_cpu(update_queue[idx], cpu).ptr = phys_to_machine(ptr);
- per_cpu(update_queue[idx], cpu).ptr |= MMU_EXTENDED_COMMAND;
- per_cpu(update_queue[idx], cpu).val = MMUEXT_UNPIN_TABLE;
- increment_index();
- spin_unlock_irqrestore(&update_lock, flags);
-}
-
-void queue_set_ldt(unsigned long ptr, unsigned long len)
-{
- int cpu = smp_processor_id();
- int idx;
- unsigned long flags;
- spin_lock_irqsave(&update_lock, flags);
- idx = per_cpu(mmu_update_queue_idx, cpu);
- per_cpu(update_queue[idx], cpu).ptr = MMU_EXTENDED_COMMAND | ptr;
- per_cpu(update_queue[idx], cpu).val = MMUEXT_SET_LDT | (len << MMUEXT_CMD_SHIFT);
- increment_index();
- spin_unlock_irqrestore(&update_lock, flags);
-}
-
-void queue_machphys_update(unsigned long mfn, unsigned long pfn)
-{
- int cpu = smp_processor_id();
- int idx;
- unsigned long flags;
- spin_lock_irqsave(&update_lock, flags);
- idx = per_cpu(mmu_update_queue_idx, cpu);
- per_cpu(update_queue[idx], cpu).ptr = (mfn << PAGE_SHIFT) | MMU_MACHPHYS_UPDATE;
- per_cpu(update_queue[idx], cpu).val = pfn;
- increment_index();
- spin_unlock_irqrestore(&update_lock, flags);
-}
-
-/* queue and flush versions of the above */
void xen_l1_entry_update(pte_t *ptr, unsigned long val)
{
- int cpu = smp_processor_id();
- int idx;
- unsigned long flags;
- spin_lock_irqsave(&update_lock, flags);
- idx = per_cpu(mmu_update_queue_idx, cpu);
- per_cpu(update_queue[idx], cpu).ptr = virt_to_machine(ptr);
- per_cpu(update_queue[idx], cpu).val = val;
- increment_index_and_flush();
- spin_unlock_irqrestore(&update_lock, flags);
+ mmu_update_t u;
+ u.ptr = virt_to_machine(ptr);
+ u.val = val;
+ BUG_ON(HYPERVISOR_mmu_update(&u, 1, NULL) < 0);
}
void xen_l2_entry_update(pmd_t *ptr, pmd_t val)
{
- int cpu = smp_processor_id();
- int idx;
- unsigned long flags;
- spin_lock_irqsave(&update_lock, flags);
- idx = per_cpu(mmu_update_queue_idx, cpu);
- per_cpu(update_queue[idx], cpu).ptr = virt_to_machine(ptr);
- per_cpu(update_queue[idx], cpu).val = pmd_val_ma(val);
- increment_index_and_flush();
- spin_unlock_irqrestore(&update_lock, flags);
+ mmu_update_t u;
+ u.ptr = virt_to_machine(ptr);
+ u.val = pmd_val_ma(val);
+ BUG_ON(HYPERVISOR_mmu_update(&u, 1, NULL) < 0);
}
void xen_pt_switch(unsigned long ptr)
{
- int cpu = smp_processor_id();
- int idx;
- unsigned long flags;
- spin_lock_irqsave(&update_lock, flags);
- idx = per_cpu(mmu_update_queue_idx, cpu);
- per_cpu(update_queue[idx], cpu).ptr = phys_to_machine(ptr);
- per_cpu(update_queue[idx], cpu).ptr |= MMU_EXTENDED_COMMAND;
- per_cpu(update_queue[idx], cpu).val = MMUEXT_NEW_BASEPTR;
- increment_index_and_flush();
- spin_unlock_irqrestore(&update_lock, flags);
+ mmu_update_t u;
+ u.ptr = phys_to_machine(ptr) | MMU_EXTENDED_COMMAND;
+ u.val = MMUEXT_NEW_BASEPTR;
+ BUG_ON(HYPERVISOR_mmu_update(&u, 1, NULL) < 0);
}
void xen_tlb_flush(void)
{
- int cpu = smp_processor_id();
- int idx;
- unsigned long flags;
- spin_lock_irqsave(&update_lock, flags);
- idx = per_cpu(mmu_update_queue_idx, cpu);
- per_cpu(update_queue[idx], cpu).ptr = MMU_EXTENDED_COMMAND;
- per_cpu(update_queue[idx], cpu).val = MMUEXT_TLB_FLUSH;
- increment_index_and_flush();
- spin_unlock_irqrestore(&update_lock, flags);
+ mmu_update_t u;
+ u.ptr = MMU_EXTENDED_COMMAND;
+ u.val = MMUEXT_TLB_FLUSH;
+ BUG_ON(HYPERVISOR_mmu_update(&u, 1, NULL) < 0);
}
void xen_invlpg(unsigned long ptr)
{
- int cpu = smp_processor_id();
- int idx;
- unsigned long flags;
- spin_lock_irqsave(&update_lock, flags);
- idx = per_cpu(mmu_update_queue_idx, cpu);
- per_cpu(update_queue[idx], cpu).ptr = MMU_EXTENDED_COMMAND;
- per_cpu(update_queue[idx], cpu).ptr |= ptr & PAGE_MASK;
- per_cpu(update_queue[idx], cpu).val = MMUEXT_INVLPG;
- increment_index_and_flush();
- spin_unlock_irqrestore(&update_lock, flags);
+ mmu_update_t u;
+ u.ptr = (ptr & PAGE_MASK) | MMU_EXTENDED_COMMAND;
+ u.val = MMUEXT_INVLPG;
+ BUG_ON(HYPERVISOR_mmu_update(&u, 1, NULL) < 0);
}
void xen_pgd_pin(unsigned long ptr)
{
- int cpu = smp_processor_id();
- int idx;
- unsigned long flags;
- spin_lock_irqsave(&update_lock, flags);
- idx = per_cpu(mmu_update_queue_idx, cpu);
- per_cpu(update_queue[idx], cpu).ptr = phys_to_machine(ptr);
- per_cpu(update_queue[idx], cpu).ptr |= MMU_EXTENDED_COMMAND;
- per_cpu(update_queue[idx], cpu).val = MMUEXT_PIN_L2_TABLE;
- increment_index_and_flush();
- spin_unlock_irqrestore(&update_lock, flags);
+ mmu_update_t u;
+ u.ptr = phys_to_machine(ptr) | MMU_EXTENDED_COMMAND;
+ u.val = MMUEXT_PIN_L2_TABLE;
+ BUG_ON(HYPERVISOR_mmu_update(&u, 1, NULL) < 0);
}
void xen_pgd_unpin(unsigned long ptr)
{
- int cpu = smp_processor_id();
- int idx;
- unsigned long flags;
- spin_lock_irqsave(&update_lock, flags);
- idx = per_cpu(mmu_update_queue_idx, cpu);
- per_cpu(update_queue[idx], cpu).ptr = phys_to_machine(ptr);
- per_cpu(update_queue[idx], cpu).ptr |= MMU_EXTENDED_COMMAND;
- per_cpu(update_queue[idx], cpu).val = MMUEXT_UNPIN_TABLE;
- increment_index_and_flush();
- spin_unlock_irqrestore(&update_lock, flags);
+ mmu_update_t u;
+ u.ptr = phys_to_machine(ptr) | MMU_EXTENDED_COMMAND;
+ u.val = MMUEXT_UNPIN_TABLE;
+ BUG_ON(HYPERVISOR_mmu_update(&u, 1, NULL) < 0);
}
void xen_pte_pin(unsigned long ptr)
{
- int cpu = smp_processor_id();
- int idx;
- unsigned long flags;
- spin_lock_irqsave(&update_lock, flags);
- idx = per_cpu(mmu_update_queue_idx, cpu);
- per_cpu(update_queue[idx], cpu).ptr = phys_to_machine(ptr);
- per_cpu(update_queue[idx], cpu).ptr |= MMU_EXTENDED_COMMAND;
- per_cpu(update_queue[idx], cpu).val = MMUEXT_PIN_L1_TABLE;
- increment_index_and_flush();
- spin_unlock_irqrestore(&update_lock, flags);
+ mmu_update_t u;
+ u.ptr = phys_to_machine(ptr) | MMU_EXTENDED_COMMAND;
+ u.val = MMUEXT_PIN_L1_TABLE;
+ BUG_ON(HYPERVISOR_mmu_update(&u, 1, NULL) < 0);
}
void xen_pte_unpin(unsigned long ptr)
{
- int cpu = smp_processor_id();
- int idx;
- unsigned long flags;
- spin_lock_irqsave(&update_lock, flags);
- idx = per_cpu(mmu_update_queue_idx, cpu);
- per_cpu(update_queue[idx], cpu).ptr = phys_to_machine(ptr);
- per_cpu(update_queue[idx], cpu).ptr |= MMU_EXTENDED_COMMAND;
- per_cpu(update_queue[idx], cpu).val = MMUEXT_UNPIN_TABLE;
- increment_index_and_flush();
- spin_unlock_irqrestore(&update_lock, flags);
+ mmu_update_t u;
+ u.ptr = phys_to_machine(ptr) | MMU_EXTENDED_COMMAND;
+ u.val = MMUEXT_UNPIN_TABLE;
+ BUG_ON(HYPERVISOR_mmu_update(&u, 1, NULL) < 0);
}
void xen_set_ldt(unsigned long ptr, unsigned long len)
{
- int cpu = smp_processor_id();
- int idx;
- unsigned long flags;
- spin_lock_irqsave(&update_lock, flags);
- idx = per_cpu(mmu_update_queue_idx, cpu);
- per_cpu(update_queue[idx], cpu).ptr = MMU_EXTENDED_COMMAND | ptr;
- per_cpu(update_queue[idx], cpu).val = MMUEXT_SET_LDT | (len << MMUEXT_CMD_SHIFT);
- increment_index_and_flush();
- spin_unlock_irqrestore(&update_lock, flags);
+ mmu_update_t u;
+ u.ptr = ptr | MMU_EXTENDED_COMMAND;
+ u.val = (len << MMUEXT_CMD_SHIFT) | MMUEXT_SET_LDT;
+ BUG_ON(HYPERVISOR_mmu_update(&u, 1, NULL) < 0);
}
void xen_machphys_update(unsigned long mfn, unsigned long pfn)
{
- int cpu = smp_processor_id();
- int idx;
- unsigned long flags;
- spin_lock_irqsave(&update_lock, flags);
- idx = per_cpu(mmu_update_queue_idx, cpu);
- per_cpu(update_queue[idx], cpu).ptr = (mfn << PAGE_SHIFT) | MMU_MACHPHYS_UPDATE;
- per_cpu(update_queue[idx], cpu).val = pfn;
- increment_index_and_flush();
- spin_unlock_irqrestore(&update_lock, flags);
+ mmu_update_t u;
+ u.ptr = (mfn << PAGE_SHIFT) | MMU_MACHPHYS_UPDATE;
+ u.val = pfn;
+ BUG_ON(HYPERVISOR_mmu_update(&u, 1, NULL) < 0);
}
#ifdef CONFIG_XEN_PHYSDEV_ACCESS
pmd = pmd_offset(pud, (vstart + (i*PAGE_SIZE)));
pte = pte_offset_kernel(pmd, (vstart + (i*PAGE_SIZE)));
pfn_array[i] = pte->pte_low >> PAGE_SHIFT;
- queue_l1_entry_update(pte, 0);
+ HYPERVISOR_update_va_mapping(vstart + (i*PAGE_SIZE), __pte_ma(0), 0);
phys_to_machine_mapping[__pa(vstart)>>PAGE_SHIFT] = INVALID_P2M_ENTRY;
}
- /* Flush updates through and flush the TLB. */
xen_tlb_flush();
balloon_put_pages(pfn_array, 1 << order);
make_page_readonly(new_pgd);
xen_pgd_pin(__pa(new_pgd));
load_cr3(new_pgd);
- flush_page_update_queue();
xen_pgd_unpin(__pa(old_pgd));
make_page_writable(old_pgd);
__flush_tlb_all();
return page;
}
-static inline pte_t *get_ptep(unsigned long addr)
-{
- pgd_t *pgd;
- pud_t *pud;
- pmd_t *pmd;
-
- pgd = pgd_offset_k(addr);
- if ( pgd_none(*pgd) || pgd_bad(*pgd) ) BUG();
-
- pud = pud_offset(pgd, addr);
- if ( pud_none(*pud) || pud_bad(*pud) ) BUG();
-
- pmd = pmd_offset(pud, addr);
- if ( pmd_none(*pmd) || pmd_bad(*pmd) ) BUG();
-
- return pte_offset_kernel(pmd, addr);
-}
-
static void balloon_alarm(unsigned long unused)
{
schedule_work(&balloon_worker);
/* Update P->M and M->P tables. */
phys_to_machine_mapping[pfn] = mfn_list[i];
- queue_machphys_update(mfn_list[i], pfn);
+ xen_machphys_update(mfn_list[i], pfn);
/* Link back into the page tables if it's not a highmem page. */
if ( pfn < max_low_pfn )
- queue_l1_entry_update(
- get_ptep((unsigned long)__va(pfn << PAGE_SHIFT)),
- (mfn_list[i] << PAGE_SHIFT) | pgprot_val(PAGE_KERNEL));
-
+ {
+ HYPERVISOR_update_va_mapping(
+ (unsigned long)__va(pfn << PAGE_SHIFT),
+ __pte_ma((mfn_list[i] << PAGE_SHIFT) |
+ pgprot_val(PAGE_KERNEL)),
+ 0);
+ }
+
/* Finally, relinquish the memory back to the system allocator. */
ClearPageReserved(page);
set_page_count(page, 1);
{
v = phys_to_virt(pfn << PAGE_SHIFT);
scrub_pages(v, 1);
- queue_l1_entry_update(get_ptep((unsigned long)v), 0);
+ HYPERVISOR_update_va_mapping(
+ (unsigned long)v, __pte_ma(0), 0);
}
#ifdef CONFIG_XEN_SCRUB_PAGES
else
/* Ensure that ballooned highmem pages don't have cached mappings. */
kmap_flush_unused();
- /* Flush updates through and flush the TLB. */
xen_tlb_flush();
/* No more mappings: invalidate pages in P2M and add to balloon. */
* it slows down context switching. Noone uses it anyway.
*/
cpu = cpu; /* XXX avoid compiler warning */
- queue_set_ldt(0UL, 0);
+ xen_set_ldt(0UL, 0);
put_cpu();
}
if (likely(!count))
segments = NULL;
- queue_set_ldt((unsigned long)segments, count);
+ xen_set_ldt((unsigned long)segments, count);
}
static inline void load_LDT(mm_context_t *pc)
#define activate_mm(prev, next) do { \
switch_mm((prev),(next),NULL); \
- flush_page_update_queue(); \
} while (0)
#endif
#include <asm/io.h> /* for phys_to_virt and page_to_pseudophys */
#define pmd_populate_kernel(mm, pmd, pte) \
- set_pmd(pmd, __pmd(_PAGE_TABLE + __pa(pte)))
+ set_pmd(pmd, __pmd(_PAGE_TABLE + __pa(pte)))
#define pmd_populate(mm, pmd, pte) do { \
set_pmd(pmd, __pmd(_PAGE_TABLE + \
((unsigned long long)page_to_pfn(pte) << \
(unsigned long long) PAGE_SHIFT))); \
- flush_page_update_queue(); \
} while (0)
/*
* Allocate and free page tables.
{
free_page((unsigned long)pte);
make_page_writable(pte);
- flush_page_update_queue();
}
extern void pte_free(struct page *pte);
} \
} while (0)
-/* NOTE: make_page* callers must call flush_page_update_queue() */
void make_lowmem_page_readonly(void *va);
void make_lowmem_page_writable(void *va);
void make_page_readonly(void *va);
}
#define load_cr3(pgdir) do { \
- queue_pt_switch(__pa(pgdir)); \
+ xen_pt_switch(__pa(pgdir)); \
per_cpu(cur_pgd, smp_processor_id()) = pgdir; \
} while (/* CONSTCOND */0)
* be MACHINE addresses.
*/
-void queue_l1_entry_update(pte_t *ptr, unsigned long val);
-void queue_l2_entry_update(pmd_t *ptr, pmd_t val);
-void queue_pt_switch(unsigned long ptr);
-void queue_tlb_flush(void);
-void queue_invlpg(unsigned long ptr);
-void queue_pgd_pin(unsigned long ptr);
-void queue_pgd_unpin(unsigned long ptr);
-void queue_pte_pin(unsigned long ptr);
-void queue_pte_unpin(unsigned long ptr);
-void queue_set_ldt(unsigned long ptr, unsigned long bytes);
-void queue_machphys_update(unsigned long mfn, unsigned long pfn);
void xen_l1_entry_update(pte_t *ptr, unsigned long val);
void xen_l2_entry_update(pmd_t *ptr, pmd_t val);
void xen_pt_switch(unsigned long ptr);
void xen_set_ldt(unsigned long ptr, unsigned long bytes);
void xen_machphys_update(unsigned long mfn, unsigned long pfn);
-void _flush_page_update_queue(void);
-
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
/*
** XXX SMH: 2.4 doesn't have percpu.h (or support SMP guests) so just
#define EXPORT_PER_CPU_SYMBOL_GPL(var) EXPORT_SYMBOL_GPL(per_cpu__##var)
#endif /* linux < 2.6.0 */
-#define flush_page_update_queue() do { \
- DECLARE_PER_CPU(unsigned int, mmu_update_queue_idx); \
- if (per_cpu(mmu_update_queue_idx, smp_processor_id())) \
- _flush_page_update_queue(); \
-} while (0)
-void MULTICALL_flush_page_update_queue(void);
-
#ifdef CONFIG_XEN_PHYSDEV_ACCESS
/* Allocate a contiguous empty region of low memory. Return virtual start. */
unsigned long allocate_empty_lowmem_region(unsigned long pages);
+++ /dev/null
-/******************************************************************************
- * multicall.h
- *
- * Copyright (c) 2003-2004, K A Fraser
- *
- * This file may be distributed separately from the Linux kernel, or
- * incorporated into other software packages, subject to the following license:
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this source file (the "Software"), to deal in the Software without
- * restriction, including without limitation the rights to use, copy, modify,
- * merge, publish, distribute, sublicense, and/or sell copies of the Software,
- * and to permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
- */
-
-#ifndef __MULTICALL_H__
-#define __MULTICALL_H__
-
-#include <asm-xen/hypervisor.h>
-
-DECLARE_PER_CPU(multicall_entry_t, multicall_list[]);
-DECLARE_PER_CPU(int, nr_multicall_ents);
-
-static inline void queue_multicall0(unsigned long op)
-{
- int cpu = smp_processor_id();
- int i = per_cpu(nr_multicall_ents, cpu);
- per_cpu(multicall_list[i], cpu).op = op;
- per_cpu(nr_multicall_ents, cpu) = i+1;
-}
-
-static inline void queue_multicall1(unsigned long op, unsigned long arg1)
-{
- int cpu = smp_processor_id();
- int i = per_cpu(nr_multicall_ents, cpu);
- per_cpu(multicall_list[i], cpu).op = op;
- per_cpu(multicall_list[i], cpu).args[0] = arg1;
- per_cpu(nr_multicall_ents, cpu) = i+1;
-}
-
-static inline void queue_multicall2(
- unsigned long op, unsigned long arg1, unsigned long arg2)
-{
- int cpu = smp_processor_id();
- int i = per_cpu(nr_multicall_ents, cpu);
- per_cpu(multicall_list[i], cpu).op = op;
- per_cpu(multicall_list[i], cpu).args[0] = arg1;
- per_cpu(multicall_list[i], cpu).args[1] = arg2;
- per_cpu(nr_multicall_ents, cpu) = i+1;
-}
-
-static inline void queue_multicall3(
- unsigned long op, unsigned long arg1, unsigned long arg2,
- unsigned long arg3)
-{
- int cpu = smp_processor_id();
- int i = per_cpu(nr_multicall_ents, cpu);
- per_cpu(multicall_list[i], cpu).op = op;
- per_cpu(multicall_list[i], cpu).args[0] = arg1;
- per_cpu(multicall_list[i], cpu).args[1] = arg2;
- per_cpu(multicall_list[i], cpu).args[2] = arg3;
- per_cpu(nr_multicall_ents, cpu) = i+1;
-}
-
-static inline void queue_multicall4(
- unsigned long op, unsigned long arg1, unsigned long arg2,
- unsigned long arg3, unsigned long arg4)
-{
- int cpu = smp_processor_id();
- int i = per_cpu(nr_multicall_ents, cpu);
- per_cpu(multicall_list[i], cpu).op = op;
- per_cpu(multicall_list[i], cpu).args[0] = arg1;
- per_cpu(multicall_list[i], cpu).args[1] = arg2;
- per_cpu(multicall_list[i], cpu).args[2] = arg3;
- per_cpu(multicall_list[i], cpu).args[3] = arg4;
- per_cpu(nr_multicall_ents, cpu) = i+1;
-}
-
-static inline void queue_multicall5(
- unsigned long op, unsigned long arg1, unsigned long arg2,
- unsigned long arg3, unsigned long arg4, unsigned long arg5)
-{
- int cpu = smp_processor_id();
- int i = per_cpu(nr_multicall_ents, cpu);
- per_cpu(multicall_list[i], cpu).op = op;
- per_cpu(multicall_list[i], cpu).args[0] = arg1;
- per_cpu(multicall_list[i], cpu).args[1] = arg2;
- per_cpu(multicall_list[i], cpu).args[2] = arg3;
- per_cpu(multicall_list[i], cpu).args[3] = arg4;
- per_cpu(multicall_list[i], cpu).args[4] = arg5;
- per_cpu(nr_multicall_ents, cpu) = i+1;
-}
-
-static inline void execute_multicall_list(void)
-{
- int cpu = smp_processor_id();
- if ( unlikely(per_cpu(nr_multicall_ents, cpu) == 0) ) return;
- (void)HYPERVISOR_multicall(&per_cpu(multicall_list[0], cpu),
- per_cpu(nr_multicall_ents, cpu));
- per_cpu(nr_multicall_ents, cpu) = 0;
-}
-
-#endif /* __MULTICALL_H__ */